extern void initialize_keytable();
extern int do_timer_lists_from_pit;
-char ignore_irq13; /* set if exception 16 works */
+char ignore_irq13; /* set if exception 16 works */
struct cpuinfo_x86 boot_cpu_data = { 0, 0, 0, 0, -1 };
#if defined(__x86_64__)
struct exec_domain *idle_task[NR_CPUS] = { &idle0_exec_domain };
-#ifdef CONFIG_ACPI_INTERPRETER
+#ifdef CONFIG_ACPI_INTERPRETER
int acpi_disabled = 0;
#else
int acpi_disabled = 1;
{
int nr = smp_processor_id();
struct tss_struct *t = &init_tss[nr];
- unsigned char idt_load[10];
if ( test_and_set_bit(nr, &cpu_initialized) )
panic("CPU#%d already initialized!!!\n", nr);
SET_GDT_ADDRESS(current, DEFAULT_GDT_ADDRESS);
__asm__ __volatile__ ( "lgdt %0" : "=m" (*current->arch.gdt) );
- *(unsigned short *)(&idt_load[0]) = (IDT_ENTRIES*sizeof(idt_entry_t))-1;
- *(unsigned long *)(&idt_load[2]) = (unsigned long)idt_tables[nr];
- __asm__ __volatile__ ( "lidt %0" : "=m" (idt_load) );
-
/* No nested task. */
__asm__ __volatile__ ( "pushf ; andw $0xbfff,(%"__OP"sp) ; popf" );
APIC_init_uniprocessor();
#else
if ( opt_nosmp )
- APIC_init_uniprocessor();
+ APIC_init_uniprocessor();
else
smp_boot_cpus();
/*
static int cpucount;
+#ifdef __i386__
+static void construct_percpu_idt(unsigned int cpu)
+{
+ unsigned char idt_load[10];
+
+ idt_tables[cpu] = xmalloc_array(idt_entry_t, IDT_ENTRIES);
+ memcpy(idt_tables[cpu], idt_table, IDT_ENTRIES*sizeof(idt_entry_t));
+
+ *(unsigned short *)(&idt_load[0]) = (IDT_ENTRIES*sizeof(idt_entry_t))-1;
+ *(unsigned long *)(&idt_load[2]) = (unsigned long)idt_tables[cpu];
+ __asm__ __volatile__ ( "lidt %0" : "=m" (idt_load) );
+}
+#endif
+
/*
* Activate a secondary processor.
*/
set_current(idle_task[cpu]);
- /*
- * At this point, boot CPU has fully initialised the IDT. It is
- * now safe to make ourselves a private copy.
- */
- idt_tables[cpu] = xmalloc_array(idt_entry_t, IDT_ENTRIES);
- memcpy(idt_tables[cpu], idt_table, IDT_ENTRIES*sizeof(idt_entry_t));
-
percpu_traps_init();
cpu_init();
while (!atomic_read(&smp_commenced))
rep_nop();
+#ifdef __i386__
/*
- * low-memory mappings have been cleared, flush them from the local TLBs
- * too.
+ * At this point, boot CPU has fully initialised the IDT. It is
+ * now safe to make ourselves a private copy.
*/
+ construct_percpu_idt(cpu);
+#endif
+
local_flush_tlb();
startup_cpu_idle_loop();
#endif
string_param("nmi", opt_nmi);
-asmlinkage int hypercall(void);
-
-/* Master table, and the one used by CPU0. */
+/* Master table, used by all CPUs on x86/64, and by CPU0 on x86/32.*/
idt_entry_t idt_table[IDT_ENTRIES] = { {0, 0}, };
-/* All other CPUs have their own copy. */
-idt_entry_t *idt_tables[NR_CPUS] = { 0 };
asmlinkage void divide_error(void);
asmlinkage void debug(void);
return EXCRET_not_a_fault;
}
-BUILD_SMP_INTERRUPT(deferred_nmi, TRAP_deferred_nmi)
-asmlinkage void smp_deferred_nmi(struct xen_regs regs)
-{
- ack_APIC_irq();
- do_nmi(®s, 0);
-}
-
void set_intr_gate(unsigned int n, void *addr)
{
_set_gate(idt_table+n,14,0,addr);
set_intr_gate(TRAP_alignment_check,&alignment_check);
set_intr_gate(TRAP_machine_check,&machine_check);
set_intr_gate(TRAP_simd_error,&simd_coprocessor_error);
- set_intr_gate(TRAP_deferred_nmi,&deferred_nmi);
-
-#if defined(__i386__)
- _set_gate(idt_table+HYPERCALL_VECTOR, 14, 1, &hypercall);
-#endif
-
- /* CPU0 uses the master IDT. */
- idt_tables[0] = idt_table;
percpu_traps_init();
#include <xen/irq.h>
#include <asm/flushtlb.h>
+/* All CPUs have their own IDT to allow set_fast_trap(). */
+idt_entry_t *idt_tables[NR_CPUS] = { 0 };
+
static int kstack_depth_to_print = 8*20;
static inline int kernel_text_address(unsigned long addr)
__asm__ __volatile__ ( "hlt" );
}
+BUILD_SMP_INTERRUPT(deferred_nmi, TRAP_deferred_nmi)
+asmlinkage void smp_deferred_nmi(struct xen_regs regs)
+{
+ asmlinkage void do_nmi(struct xen_regs *, unsigned long);
+ ack_APIC_irq();
+ do_nmi(®s, 0);
+}
+
void __init percpu_traps_init(void)
{
- if ( smp_processor_id() == 0 )
- {
- /*
- * Make a separate task for double faults. This will get us debug
- * output if we blow the kernel stack.
- */
- struct tss_struct *tss = &doublefault_tss;
- memset(tss, 0, sizeof(*tss));
- tss->ds = __HYPERVISOR_DS;
- tss->es = __HYPERVISOR_DS;
- tss->ss = __HYPERVISOR_DS;
- tss->esp = (unsigned long)
- &doublefault_stack[DOUBLEFAULT_STACK_SIZE];
- tss->__cr3 = __pa(idle_pg_table);
- tss->cs = __HYPERVISOR_CS;
- tss->eip = (unsigned long)do_double_fault;
- tss->eflags = 2;
- tss->bitmap = IOBMP_INVALID_OFFSET;
- _set_tssldt_desc(gdt_table+__DOUBLEFAULT_TSS_ENTRY,
- (unsigned long)tss, 235, 9);
- }
+ asmlinkage int hypercall(void);
+
+ if ( smp_processor_id() != 0 )
+ return;
+
+ /* CPU0 uses the master IDT. */
+ idt_tables[0] = idt_table;
+
+ /* The hypercall entry vector is only accessible from ring 1. */
+ _set_gate(idt_table+HYPERCALL_VECTOR, 14, 1, &hypercall);
+
+ set_intr_gate(TRAP_deferred_nmi, &deferred_nmi);
+
+ /*
+ * Make a separate task for double faults. This will get us debug output if
+ * we blow the kernel stack.
+ */
+ struct tss_struct *tss = &doublefault_tss;
+ memset(tss, 0, sizeof(*tss));
+ tss->ds = __HYPERVISOR_DS;
+ tss->es = __HYPERVISOR_DS;
+ tss->ss = __HYPERVISOR_DS;
+ tss->esp = (unsigned long)
+ &doublefault_stack[DOUBLEFAULT_STACK_SIZE];
+ tss->__cr3 = __pa(idle_pg_table);
+ tss->cs = __HYPERVISOR_CS;
+ tss->eip = (unsigned long)do_double_fault;
+ tss->eflags = 2;
+ tss->bitmap = IOBMP_INVALID_OFFSET;
+ _set_tssldt_desc(gdt_table+__DOUBLEFAULT_TSS_ENTRY,
+ (unsigned long)tss, 235, 9);
set_task_gate(TRAP_double_fault, __DOUBLEFAULT_TSS_ENTRY<<3);
}
char *stack_bottom, *stack;
int cpu = smp_processor_id();
+ if ( cpu == 0 )
+ {
+ /* Specify dedicated interrupt stacks for NMIs and double faults. */
+ set_intr_gate(TRAP_double_fault, &double_fault);
+ idt_table[TRAP_double_fault].a |= 1UL << 32; /* IST1 */
+ idt_table[TRAP_nmi].a |= 2UL << 32; /* IST2 */
+ }
+
stack_bottom = (char *)get_stack_bottom();
stack = (char *)((unsigned long)stack_bottom & ~(STACK_SIZE - 1));
/* Double-fault handler has its own per-CPU 1kB stack. */
init_tss[cpu].ist[0] = (unsigned long)&stack[1024];
- set_intr_gate(TRAP_double_fault, &double_fault);
- idt_tables[cpu][TRAP_double_fault].a |= 1UL << 32; /* IST1 */
/* NMI handler has its own per-CPU 1kB stack. */
init_tss[cpu].ist[1] = (unsigned long)&stack[2048];
- idt_tables[cpu][TRAP_nmi].a |= 2UL << 32; /* IST2 */
/*
* Trampoline for SYSCALL entry from long mode.